Ensure block/yield hypercalls always return a sane return code.
Ensure callers of __enter_scheduler take appropriate arch-specific
action if no context switch occurs (callers from arch/x86 do not
expect to return from a call into the scheduler).
This fixes wildly unintuitive behaviour of do_block() for the
VMX team.
Signed-off-by: Keir Fraser <keir@xensource.com>
if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
}
+void continue_running(struct exec_domain *same)
+{
+ /* nothing to do */
+}
+
void panic_domain(struct pt_regs *regs, const char *fmt, ...)
{
va_list args;
clear_bit(EDF_RUNNING, &prev->ed_flags);
schedule_tail(next);
+ BUG();
+}
+void continue_running(struct exec_domain *same)
+{
+ schedule_tail(same);
BUG();
}
addl $16,%esp
ret
+do_arch_sched_op:
+ # Ensure we return success even if we return via schedule_tail()
+ xorl %eax,%eax
+ movl %eax,UREGS_eax+4(%esp)
+ jmp SYMBOL_NAME(do_sched_op)
+
do_switch_vm86:
# Discard the return address
addl $4,%esp
.long SYMBOL_NAME(do_stack_switch)
.long SYMBOL_NAME(do_set_callbacks)
.long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
- .long SYMBOL_NAME(do_sched_op)
+ .long SYMBOL_NAME(do_arch_sched_op)
.long SYMBOL_NAME(do_dom0_op)
.long SYMBOL_NAME(do_set_debugreg)
.long SYMBOL_NAME(do_get_debugreg)
call SYMBOL_NAME(do_nmi)
jmp restore_all_xen
+do_arch_sched_op:
+ # Ensure we return success even if we return via schedule_tail()
+ xorl %eax,%eax
+ movq %rax,UREGS_rax+8(%rsp)
+ jmp SYMBOL_NAME(do_sched_op)
+
.data
ENTRY(exception_table)
.quad SYMBOL_NAME(do_stack_switch)
.quad SYMBOL_NAME(do_set_callbacks)
.quad SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
- .quad SYMBOL_NAME(do_sched_op)
+ .quad SYMBOL_NAME(do_arch_sched_op)
.quad SYMBOL_NAME(do_dom0_op)
.quad SYMBOL_NAME(do_set_debugreg)
.quad SYMBOL_NAME(do_get_debugreg)
/* Check for events /after/ blocking: avoids wakeup waiting race. */
if ( event_pending(ed) )
+ {
clear_bit(EDF_BLOCKED, &ed->ed_flags);
+ }
else
{
TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->eid);
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
if ( unlikely(prev == next) )
- return;
+ return continue_running(prev);
perfc_incrc(sched_ctx);
atomic_inc(&d->refcnt);
ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
}
-
+
extern struct domain *do_createdomain(
domid_t dom_id, unsigned int cpu);
extern int construct_dom0(
extern void sync_lazy_execstate_all(void);
extern int __sync_lazy_execstate(void);
+/* Called by the scheduler to switch to another exec_domain. */
extern void context_switch(
struct exec_domain *prev,
struct exec_domain *next);
+/* Called by the scheduler to continue running the current exec_domain. */
+extern void continue_running(
+ struct exec_domain *same);
+
void domain_init(void);
int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */